psr = ia64_clear_ic();
if ( cl == ISIDE_TLB ) {
- ia64_itc(1, mtlb.ifa, mtlb.page_flags, mtlb.ps);
+ ia64_itc(1, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0));
}
else {
- ia64_itc(2, mtlb.ifa, mtlb.page_flags, mtlb.ps);
+ ia64_itc(2, mtlb.ifa, mtlb.page_flags, IA64_ITIR_PS_KEY(mtlb.ps, 0));
}
ia64_set_psr(psr);
ia64_srlz_i();
} else {
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
- ia64_itc(type + 1, va, phy_pte, itir_ps(itir));
+ ia64_itc(type + 1, va, phy_pte, itir);
ia64_set_psr(psr);
ia64_srlz_i();
}
u64 psr;
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
- ia64_itc(type + 1, ifa, phy_pte, ps);
+ ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0));
ia64_set_psr(psr);
ia64_srlz_i();
// ps < mrr.ps, this is not supported
unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
IA64FAULT fault;
int is_ptc_l_needed = 0;
- u64 logps;
+ ia64_itir_t _itir = {.itir = itir};
if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA)
struct p2m_entry entry;
unsigned long m_pteval;
m_pteval = translate_domain_pte(pteval, address, itir,
- &logps, &entry);
+ &(_itir.itir), &entry);
vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
- m_pteval, pteval, logps, &entry);
+ m_pteval, pteval, _itir.itir, &entry);
if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
p2m_entry_retry(&entry)) {
/* dtlb has been purged in-between. This dtlb was
matching. Undo the work. */
- vcpu_flush_tlb_vhpt_range(address, logps);
+ vcpu_flush_tlb_vhpt_range(address, _itir.ps);
// the stale entry which we inserted above
// may remains in tlb cache.
}
if (is_ptc_l_needed)
- vcpu_ptc_l(current, address, logps);
+ vcpu_ptc_l(current, address, _itir.ps);
if (!guest_mode(regs)) {
/* The fault occurs inside Xen. */
if (!ia64_done_with_exception(regs)) {
// address, convert the pte for a physical address for (possibly different)
// Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
// PAGE_SIZE!)
-u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps,
+u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir,
struct p2m_entry* entry)
{
struct domain *d = current->domain;
- ia64_itir_t itir = {.itir = itir__};
+ ia64_itir_t _itir = {.itir = itir__};
u64 mask, mpaddr, pteval2;
u64 arflags;
u64 arflags2;
pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
// FIXME address had better be pre-validated on insert
- mask = ~itir_mask(itir.itir);
+ mask = ~itir_mask(_itir.itir);
mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
- if (itir.ps > PAGE_SHIFT)
- itir.ps = PAGE_SHIFT;
+ if (_itir.ps > PAGE_SHIFT)
+ _itir.ps = PAGE_SHIFT;
- *logps = itir.ps;
+ ((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */
+ ((ia64_itir_t*)itir)->ps = _itir.ps; /* Overwrite ps part! */
pteval2 = lookup_domain_mpa(d, mpaddr, entry);
void
vcpu_itc_no_srlz(VCPU * vcpu, u64 IorD, u64 vaddr, u64 pte,
- u64 mp_pte, u64 logps, struct p2m_entry *entry)
+ u64 mp_pte, u64 itir, struct p2m_entry *entry)
{
+ ia64_itir_t _itir = {.itir = itir};
unsigned long psr;
- unsigned long ps = (vcpu->domain == dom0) ? logps : PAGE_SHIFT;
+ unsigned long ps = (vcpu->domain == dom0) ? _itir.ps : PAGE_SHIFT;
- check_xen_space_overlap("itc", vaddr, 1UL << logps);
+ check_xen_space_overlap("itc", vaddr, 1UL << _itir.ps);
// FIXME, must be inlined or potential for nested fault here!
- if ((vcpu->domain == dom0) && (logps < PAGE_SHIFT))
+ if ((vcpu->domain == dom0) && (_itir.ps < PAGE_SHIFT))
panic_domain(NULL, "vcpu_itc_no_srlz: domain trying to use "
"smaller page size!\n");
- BUG_ON(logps > PAGE_SHIFT);
+ BUG_ON(_itir.ps > PAGE_SHIFT);
vcpu_tlb_track_insert_or_dirty(vcpu, vaddr, entry);
psr = ia64_clear_ic();
pte &= ~(_PAGE_RV2 | _PAGE_RV1); // Mask out the reserved bits.
- ia64_itc(IorD, vaddr, pte, ps); // FIXME: look for bigger mappings
+ // FIXME: look for bigger mappings
+ ia64_itc(IorD, vaddr, pte, IA64_ITIR_PS_KEY(ps, _itir.key));
ia64_set_psr(psr);
// ia64_srlz_i(); // no srls req'd, will rfi later
if (vcpu->domain == dom0 && ((vaddr >> 61) == 7)) {
// addresses never get flushed. More work needed if this
// ever happens.
//printk("vhpt_insert(%p,%p,%p)\n",vaddr,pte,1L<<logps);
- if (logps > PAGE_SHIFT)
- vhpt_multiple_insert(vaddr, pte, logps);
+ if (_itir.ps > PAGE_SHIFT)
+ vhpt_multiple_insert(vaddr, pte, _itir.itir);
else
- vhpt_insert(vaddr, pte, logps << 2);
+ vhpt_insert(vaddr, pte, _itir.itir);
}
// even if domain pagesize is larger than PAGE_SIZE, just put
// PAGE_SIZE mapping in the vhpt for now, else purging is complicated
- else
- vhpt_insert(vaddr, pte, PAGE_SHIFT << 2);
+ else {
+ _itir.ps = PAGE_SHIFT;
+ vhpt_insert(vaddr, pte, _itir.itir);
+ }
}
IA64FAULT vcpu_itc_d(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
{
- unsigned long pteval, logps = itir_ps(itir);
+ unsigned long pteval;
BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
struct p2m_entry entry;
+ ia64_itir_t _itir = {.itir = itir};
- if (logps < PAGE_SHIFT)
+ if (_itir.ps < PAGE_SHIFT)
panic_domain(NULL, "vcpu_itc_d: domain trying to use "
"smaller page size!\n");
again:
//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
- pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
+ pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
if (!pteval)
return IA64_ILLOP_FAULT;
if (swap_rr0)
set_one_rr(0x0, PSCB(vcpu, rrs[0]));
- vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, logps, &entry);
+ vcpu_itc_no_srlz(vcpu, 2, ifa, pteval, pte, _itir.itir, &entry);
if (swap_rr0)
set_metaphysical_rr0();
if (p2m_entry_retry(&entry)) {
- vcpu_flush_tlb_vhpt_range(ifa, logps);
+ vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
goto again;
}
vcpu_set_tr_entry(&PSCBX(vcpu, dtlb), pte, itir, ifa);
IA64FAULT vcpu_itc_i(VCPU * vcpu, u64 pte, u64 itir, u64 ifa)
{
- unsigned long pteval, logps = itir_ps(itir);
+ unsigned long pteval;
BOOLEAN swap_rr0 = (!(ifa >> 61) && PSCB(vcpu, metaphysical_mode));
struct p2m_entry entry;
+ ia64_itir_t _itir = {.itir = itir};
- if (logps < PAGE_SHIFT)
+ if (_itir.ps < PAGE_SHIFT)
panic_domain(NULL, "vcpu_itc_i: domain trying to use "
"smaller page size!\n");
again:
//itir = (itir & ~0xfc) | (PAGE_SHIFT<<2); // ignore domain's pagesize
- pteval = translate_domain_pte(pte, ifa, itir, &logps, &entry);
+ pteval = translate_domain_pte(pte, ifa, itir, &(_itir.itir), &entry);
if (!pteval)
return IA64_ILLOP_FAULT;
if (swap_rr0)
set_one_rr(0x0, PSCB(vcpu, rrs[0]));
- vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, logps, &entry);
+ vcpu_itc_no_srlz(vcpu, 1, ifa, pteval, pte, _itir.itir, &entry);
if (swap_rr0)
set_metaphysical_rr0();
if (p2m_entry_retry(&entry)) {
- vcpu_flush_tlb_vhpt_range(ifa, logps);
+ vcpu_flush_tlb_vhpt_range(ifa, _itir.ps);
goto again;
}
vcpu_set_tr_entry(&PSCBX(vcpu, itlb), pte, itir, ifa);
// initialize cache too???
}
-void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long logps)
+void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir)
{
struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr);
unsigned long tag = ia64_ttag (vadr);
* because the processor may support speculative VHPT walk. */
vlfe->ti_tag = INVALID_TI_TAG;
wmb();
- vlfe->itir = logps;
+ vlfe->itir = itir;
vlfe->page_flags = pte | _PAGE_P;
*(volatile unsigned long*)&vlfe->ti_tag = tag;
}
-void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long logps)
+void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
+ unsigned long itir)
{
- unsigned long mask = (1L << logps) - 1;
+ ia64_itir_t _itir = {.itir = itir};
+ unsigned long mask = (1L << _itir.ps) - 1;
int i;
- if (logps-PAGE_SHIFT > 10 && !running_on_sim) {
+ if (_itir.ps-PAGE_SHIFT > 10 && !running_on_sim) {
// if this happens, we may want to revisit this algorithm
panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n");
}
- if (logps-PAGE_SHIFT > 2) {
+ if (_itir.ps-PAGE_SHIFT > 2) {
// FIXME: Should add counter here to see how often this
// happens (e.g. for 16MB pages!) and determine if it
// is a performance problem. On a quick look, it takes
}
vaddr &= ~mask;
pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK);
- for (i = 1L << (logps-PAGE_SHIFT); i > 0; i--) {
- vhpt_insert(vaddr,pte,logps<<2);
+ for (i = 1L << (_itir.ps-PAGE_SHIFT); i > 0; i--) {
+ vhpt_insert(vaddr, pte, _itir.itir);
vaddr += PAGE_SIZE;
}
}
* Insert a translation into the instruction and/or data translation
* cache.
*/
+#ifdef XEN
+static inline void
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, __u64 itir)
+{
+ ia64_setreg(_IA64_REG_CR_ITIR, itir);
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
+ ia64_stop();
+ /* as per EAS2.6, itc must be the last instruction in an instruction group */
+ if (target_mask & 0x1)
+ ia64_itci(pte);
+ if (target_mask & 0x2)
+ ia64_itcd(pte);
+}
+#else
static inline void
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
__u64 log_page_size)
if (target_mask & 0x2)
ia64_itcd(pte);
}
+#endif
/*
* Purge a range of addresses from instruction and/or data translation
extern volatile unsigned long *mpt_table;
extern unsigned long gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
-extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* logps, struct p2m_entry* entry);
+extern u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__,
+ u64* itir, struct p2m_entry* entry);
#define machine_to_phys_mapping mpt_table
#define INVALID_M2P_ENTRY (~0UL)
extern void vhpt_init (void);
extern void gather_vhpt_stats(void);
extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
- unsigned long logps);
+ unsigned long itir);
extern void vhpt_insert (unsigned long vadr, unsigned long pte,
- unsigned long logps);
+ unsigned long itir);
void local_vhpt_flush(void);
extern void vcpu_vhpt_flush(struct vcpu* v);